diff options
Diffstat (limited to 'gcc/tree-vect-patterns.cc')
-rw-r--r-- | gcc/tree-vect-patterns.cc | 454 |
1 files changed, 406 insertions, 48 deletions
diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc index becee62..878a045 100644 --- a/gcc/tree-vect-patterns.cc +++ b/gcc/tree-vect-patterns.cc @@ -2542,8 +2542,8 @@ vect_recog_widen_sum_pattern (vec_info *vinfo, vect_pattern_detected ("vect_recog_widen_sum_pattern", last_stmt); - if (!vect_supportable_direct_optab_p (vinfo, type, WIDEN_SUM_EXPR, - unprom0.type, type_out)) + if (!vect_supportable_conv_optab_p (vinfo, type, WIDEN_SUM_EXPR, + unprom0.type, type_out)) return NULL; var = vect_recog_temp_ssa_var (type, NULL); @@ -3001,7 +3001,7 @@ vect_recog_over_widening_pattern (vec_info *vinfo, tree_code code = gimple_assign_rhs_code (last_stmt); /* Punt for reductions where we don't handle the type conversions. */ - if (STMT_VINFO_DEF_TYPE (last_stmt_info) == vect_reduction_def) + if (vect_is_reduction (last_stmt_info)) return NULL; /* Keep the first operand of a COND_EXPR as-is: only the other two @@ -4838,6 +4838,281 @@ vect_recog_sat_trunc_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo, return NULL; } + +/* Function add_code_for_floorceilround_divmod + A helper function to add compensation code for implementing FLOOR_MOD_EXPR, + FLOOR_DIV_EXPR, CEIL_MOD_EXPR, CEIL_DIV_EXPR, ROUND_MOD_EXPR and + ROUND_DIV_EXPR + The quotient and remainder are needed for implemented these operators. + FLOOR cases + r = x %[fl] y; r = x/[fl] y; + is + r = x % y; if (r && (x ^ y) < 0) r += y; + r = x % y; d = x/y; if (r && (x ^ y) < 0) d--; Respectively + Produce following sequence + v0 = x^y + v1 = -r + v2 = r | -r + v3 = v0 & v2 + v4 = v3 < 0 + if (floor_mod) + v5 = v4 ? y : 0 + v6 = r + v5 + if (floor_div) + v5 = v4 ? 1 : 0 + v6 = d - 1 + Similar sequences of vector instructions are produces for following cases + CEIL cases + r = x %[cl] y; r = x/[cl] y; + is + r = x % y; if (r && (x ^ y) >= 0) r -= y; + r = x % y; if (r) r -= y; (unsigned) + r = x % y; d = x/y; if (r && (x ^ y) >= 0) d++; + r = x % y; d = x/y; if (r) d++; (unsigned) + ROUND cases + r = x %[rd] y; r = x/[rd] y; + is + r = x % y; if (r > ((y-1)/2)) if ((x ^ y) >= 0) r -= y; else r += y; + r = x % y; if (r > ((y-1)/2)) r -= y; (unsigned) + r = x % y; d = x/y; if (r > ((y-1)/2)) if ((x ^ y) >= 0) d++; else d--; + r = x % y; d = x/y; if (r > ((y-1)/2)) d++; (unsigned) + Inputs: + VECTYPE: Vector type of the operands + STMT_VINFO: Statement where pattern begins + RHS_CODE: Should either be FLOOR_MOD_EXPR or FLOOR_DIV_EXPR + Q: The quotient of division + R: Remainder of division + OPRDN0/OPRND1: Actual operands involved + ITYPE: tree type of oprnd0 + Output: + NULL if vectorization not possible + Gimple statement based on rhs_code +*/ +static gimple * +add_code_for_floorceilround_divmod (tree vectype, vec_info *vinfo, + stmt_vec_info stmt_vinfo, + enum tree_code rhs_code, tree q, tree r, + tree oprnd0, tree oprnd1, tree itype) +{ + gimple *def_stmt; + tree mask_vectype = truth_type_for (vectype); + if (!mask_vectype) + return NULL; + tree bool_cond; + bool unsigned_p = TYPE_UNSIGNED (itype); + + switch (rhs_code) + { + case FLOOR_MOD_EXPR: + case FLOOR_DIV_EXPR: + case CEIL_MOD_EXPR: + case CEIL_DIV_EXPR: + { + if (!target_has_vecop_for_code (NEGATE_EXPR, vectype) + || !target_has_vecop_for_code (BIT_XOR_EXPR, vectype) + || !target_has_vecop_for_code (BIT_IOR_EXPR, vectype) + || !target_has_vecop_for_code (PLUS_EXPR, vectype) + || !target_has_vecop_for_code (MINUS_EXPR, vectype) + || !expand_vec_cmp_expr_p (vectype, mask_vectype, LT_EXPR) + || !expand_vec_cond_expr_p (vectype, mask_vectype)) + return NULL; + if (unsigned_p) + { + gcc_assert (rhs_code == CEIL_MOD_EXPR || rhs_code == CEIL_DIV_EXPR); + + if (!expand_vec_cmp_expr_p (vectype, mask_vectype, GT_EXPR)) + return NULL; + bool is_mod = rhs_code == CEIL_MOD_EXPR; + // r > 0 + bool_cond = vect_recog_temp_ssa_var (boolean_type_node, NULL); + def_stmt = gimple_build_assign (bool_cond, GT_EXPR, r, + build_int_cst (itype, 0)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, mask_vectype, + itype); + + // (r > 0) ? y : 0 (mod) + // (r > 0) ? 1 : 0 (ceil) + tree extr_cond = vect_recog_temp_ssa_var (itype, NULL); + def_stmt + = gimple_build_assign (extr_cond, COND_EXPR, bool_cond, + is_mod ? oprnd1 : build_int_cst (itype, 1), + build_int_cst (itype, 0)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + // r -= (r > 0) ? y : 0 (mod) + // d += (x^y < 0 && r) ? -1 : 0 (ceil) + tree result = vect_recog_temp_ssa_var (itype, NULL); + return gimple_build_assign (result, is_mod ? MINUS_EXPR : PLUS_EXPR, + is_mod ? r : q, extr_cond); + } + else + { + bool ceil_p + = (rhs_code == CEIL_MOD_EXPR || rhs_code == CEIL_DIV_EXPR); + if (ceil_p && !target_has_vecop_for_code (BIT_NOT_EXPR, vectype)) + return NULL; + // x ^ y + tree xort = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (xort, BIT_XOR_EXPR, oprnd0, oprnd1); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + tree cond_reg = xort; + // ~(x ^ y) (ceil) + if (ceil_p) + { + cond_reg = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (cond_reg, BIT_NOT_EXPR, xort); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + } + + // -r + tree negate_r = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (negate_r, NEGATE_EXPR, r); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + // r | -r , sign bit is set if r!=0 + tree r_or_negr = vect_recog_temp_ssa_var (itype, NULL); + def_stmt + = gimple_build_assign (r_or_negr, BIT_IOR_EXPR, r, negate_r); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + // (x ^ y) & (r | -r) + // ~(x ^ y) & (r | -r) (ceil) + tree r_or_negr_and_xor = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (r_or_negr_and_xor, BIT_AND_EXPR, + r_or_negr, cond_reg); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + // (x ^ y) & (r | -r) < 0 which is equivalent to (x^y < 0 && r!=0) + bool_cond = vect_recog_temp_ssa_var (boolean_type_node, NULL); + def_stmt + = gimple_build_assign (bool_cond, LT_EXPR, r_or_negr_and_xor, + build_int_cst (itype, 0)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, mask_vectype, + itype); + + // (x^y < 0 && r) ? y : 0 (mod) + // (x^y < 0 && r) ? -1 : 0 (div) + bool is_mod + = (rhs_code == FLOOR_MOD_EXPR || rhs_code == CEIL_MOD_EXPR); + tree extr_cond = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (extr_cond, COND_EXPR, bool_cond, + is_mod ? oprnd1 + : build_int_cst (itype, -1), + build_int_cst (itype, 0)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + // r += (x ^ y < 0 && r) ? y : 0 (floor mod) + // d += (x^y < 0 && r) ? -1 : 0 (floor div) + // r -= (x ^ y < 0 && r) ? y : 0 (ceil mod) + // d -= (x^y < 0 && r) ? -1 : 0 (ceil div) + tree result = vect_recog_temp_ssa_var (itype, NULL); + return gimple_build_assign (result, + (rhs_code == FLOOR_MOD_EXPR + || rhs_code == FLOOR_DIV_EXPR) + ? PLUS_EXPR + : MINUS_EXPR, + is_mod ? r : q, extr_cond); + } + } + case ROUND_MOD_EXPR: + case ROUND_DIV_EXPR: + { + if (!target_has_vecop_for_code (BIT_AND_EXPR, vectype) + || !target_has_vecop_for_code (PLUS_EXPR, vectype) + || !expand_vec_cmp_expr_p (vectype, mask_vectype, LT_EXPR) + || !expand_vec_cmp_expr_p (vectype, mask_vectype, GT_EXPR) + || !expand_vec_cond_expr_p (vectype, mask_vectype)) + return NULL; + + bool is_mod = rhs_code == ROUND_MOD_EXPR; + HOST_WIDE_INT d = TREE_INT_CST_LOW (oprnd1); + unsigned HOST_WIDE_INT abs_d + = (d >= 0 ? (unsigned HOST_WIDE_INT) d : -(unsigned HOST_WIDE_INT) d); + unsigned HOST_WIDE_INT mid_d = (abs_d - 1) >> 1; + if (!unsigned_p) + { + // check availibility of abs expression for vector + if (!target_has_vecop_for_code (ABS_EXPR, vectype)) + return NULL; + // abs (r) + tree abs_r = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (abs_r, ABS_EXPR, r); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + // abs (r) > (abs (y-1) >> 1) + tree round_p = vect_recog_temp_ssa_var (boolean_type_node, NULL); + def_stmt = gimple_build_assign (round_p, GT_EXPR, abs_r, + build_int_cst (itype, mid_d)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, mask_vectype, + itype); + + // x ^ y + tree cond_reg = vect_recog_temp_ssa_var (itype, NULL); + def_stmt + = gimple_build_assign (cond_reg, BIT_XOR_EXPR, oprnd0, oprnd1); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + // x ^ y < 0 + bool_cond = vect_recog_temp_ssa_var (boolean_type_node, NULL); + def_stmt = gimple_build_assign (bool_cond, LT_EXPR, cond_reg, + build_int_cst (itype, 0)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, mask_vectype, + itype); + + // x ^ y < 0 ? y : -y (mod) + // x ^ y < 0 ? -1 : 1 (div) + tree val1 = vect_recog_temp_ssa_var (itype, NULL); + def_stmt + = gimple_build_assign (val1, COND_EXPR, bool_cond, + build_int_cst (itype, is_mod ? d : -1), + build_int_cst (itype, is_mod ? -d : 1)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + int precision = TYPE_PRECISION (itype); + wide_int wmask = wi::mask (precision, false, precision); + + // abs (r) > (abs (y-1) >> 1) ? 0xffffffff : 0 + tree val2 = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (val2, COND_EXPR, round_p, + wide_int_to_tree (itype, wmask), + build_int_cst (itype, 0)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + tree fval = vect_recog_temp_ssa_var (itype, NULL); + def_stmt = gimple_build_assign (fval, BIT_AND_EXPR, val1, val2); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + tree result = vect_recog_temp_ssa_var (itype, NULL); + return gimple_build_assign (result, PLUS_EXPR, is_mod ? r : q, + fval); + } + else + { + // r > (y-1 >> 1) + tree round_p = vect_recog_temp_ssa_var (boolean_type_node, NULL); + def_stmt = gimple_build_assign (round_p, GT_EXPR, r, + build_int_cst (itype, mid_d)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, mask_vectype, + itype); + + // (r > (y-1)>>1) ? -d : 1 + tree val2 = vect_recog_temp_ssa_var (itype, NULL); + def_stmt + = gimple_build_assign (val2, COND_EXPR, round_p, + build_int_cst (itype, is_mod ? -d : 1), + build_int_cst (itype, 0)); + append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + + tree result = vect_recog_temp_ssa_var (itype, NULL); + return gimple_build_assign (result, PLUS_EXPR, is_mod ? r : q, + val2); + } + } + default: + return NULL; + } +} + /* Detect a signed division by a constant that wouldn't be otherwise vectorized: @@ -4882,7 +5157,8 @@ vect_recog_divmod_pattern (vec_info *vinfo, { gimple *last_stmt = stmt_vinfo->stmt; tree oprnd0, oprnd1, vectype, itype, cond; - gimple *pattern_stmt, *def_stmt; + gimple *pattern_stmt = NULL; + gimple *def_stmt = NULL; enum tree_code rhs_code; optab optab; tree q, cst; @@ -4899,6 +5175,12 @@ vect_recog_divmod_pattern (vec_info *vinfo, case TRUNC_DIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: + case FLOOR_MOD_EXPR: + case FLOOR_DIV_EXPR: + case CEIL_MOD_EXPR: + case CEIL_DIV_EXPR: + case ROUND_MOD_EXPR: + case ROUND_DIV_EXPR: break; default: return NULL; @@ -4930,9 +5212,16 @@ vect_recog_divmod_pattern (vec_info *vinfo, } prec = TYPE_PRECISION (itype); + + bool is_flclrd_moddiv_p + = rhs_code == FLOOR_MOD_EXPR || rhs_code == FLOOR_DIV_EXPR + || rhs_code == CEIL_MOD_EXPR || rhs_code == CEIL_DIV_EXPR + || rhs_code == ROUND_MOD_EXPR || rhs_code == ROUND_DIV_EXPR; if (integer_pow2p (oprnd1)) { - if (TYPE_UNSIGNED (itype) || tree_int_cst_sgn (oprnd1) != 1) + if ((TYPE_UNSIGNED (itype) + && (rhs_code == FLOOR_MOD_EXPR || rhs_code == FLOOR_DIV_EXPR)) + || tree_int_cst_sgn (oprnd1) != 1) return NULL; /* Pattern detected. */ @@ -4949,18 +5238,27 @@ vect_recog_divmod_pattern (vec_info *vinfo, tree var_div = vect_recog_temp_ssa_var (itype, NULL); gimple *div_stmt = gimple_build_call_internal (ifn, 2, oprnd0, shift); gimple_call_set_lhs (div_stmt, var_div); - - if (rhs_code == TRUNC_MOD_EXPR) + if (rhs_code == TRUNC_MOD_EXPR || is_flclrd_moddiv_p) { append_pattern_def_seq (vinfo, stmt_vinfo, div_stmt); + tree t1 = vect_recog_temp_ssa_var (itype, NULL); def_stmt - = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), - LSHIFT_EXPR, var_div, shift); + = gimple_build_assign (t1, LSHIFT_EXPR, var_div, shift); append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); pattern_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), - MINUS_EXPR, oprnd0, - gimple_assign_lhs (def_stmt)); + MINUS_EXPR, oprnd0, t1); + if (is_flclrd_moddiv_p) + { + append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt); + pattern_stmt + = add_code_for_floorceilround_divmod (vectype, vinfo, + stmt_vinfo, rhs_code, + var_div, t1, oprnd0, + oprnd1, itype); + if (pattern_stmt == NULL) + return NULL; + } } else pattern_stmt = div_stmt; @@ -4974,8 +5272,12 @@ vect_recog_divmod_pattern (vec_info *vinfo, build_int_cst (itype, 0)); append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, truth_type_for (vectype), itype); + tree div_result = NULL_TREE; if (rhs_code == TRUNC_DIV_EXPR - || rhs_code == EXACT_DIV_EXPR) + || rhs_code == EXACT_DIV_EXPR + || rhs_code == FLOOR_DIV_EXPR + || rhs_code == CEIL_DIV_EXPR + || rhs_code == ROUND_DIV_EXPR) { tree var = vect_recog_temp_ssa_var (itype, NULL); tree shift; @@ -4992,12 +5294,17 @@ vect_recog_divmod_pattern (vec_info *vinfo, append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); shift = build_int_cst (itype, tree_log2 (oprnd1)); + div_result = vect_recog_temp_ssa_var (itype, NULL); pattern_stmt - = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), - RSHIFT_EXPR, var, shift); + = gimple_build_assign (div_result, RSHIFT_EXPR, var, shift); } - else + if (rhs_code == TRUNC_MOD_EXPR || is_flclrd_moddiv_p) { + if (rhs_code == FLOOR_DIV_EXPR + || rhs_code == CEIL_DIV_EXPR + || rhs_code == ROUND_DIV_EXPR) + append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt); + tree signmask; if (compare_tree_int (oprnd1, 2) == 0) { @@ -5042,10 +5349,21 @@ vect_recog_divmod_pattern (vec_info *vinfo, build_int_cst (itype, 1))); append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt); + tree r = vect_recog_temp_ssa_var (itype, NULL); pattern_stmt - = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL), - MINUS_EXPR, gimple_assign_lhs (def_stmt), + = gimple_build_assign (r, MINUS_EXPR, gimple_assign_lhs (def_stmt), signmask); + if (is_flclrd_moddiv_p) + { + append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt); + pattern_stmt + = add_code_for_floorceilround_divmod (vectype, vinfo, + stmt_vinfo, rhs_code, + div_result, r, oprnd0, + oprnd1, itype); + if (pattern_stmt == NULL) + return NULL; + } } return pattern_stmt; @@ -5352,7 +5670,7 @@ vect_recog_divmod_pattern (vec_info *vinfo, } } - if (rhs_code == TRUNC_MOD_EXPR) + if (rhs_code == TRUNC_MOD_EXPR || is_flclrd_moddiv_p) { tree r, t1; @@ -5367,6 +5685,17 @@ vect_recog_divmod_pattern (vec_info *vinfo, r = vect_recog_temp_ssa_var (itype, NULL); pattern_stmt = gimple_build_assign (r, MINUS_EXPR, oprnd0, t1); + + if (is_flclrd_moddiv_p) + { + append_pattern_def_seq (vinfo, stmt_vinfo, pattern_stmt); + pattern_stmt + = add_code_for_floorceilround_divmod (vectype, vinfo, stmt_vinfo, + rhs_code, q, r, oprnd0, oprnd1, + itype); + if (pattern_stmt == NULL) + return NULL; + } } /* Pattern detected. */ @@ -6828,13 +7157,14 @@ possible_vector_mask_operation_p (stmt_vec_info stmt_info) /* If STMT_INFO sets a boolean SSA_NAME, see whether we should use a vector mask type instead of a normal vector type. Record the - result in STMT_INFO->mask_precision. */ + result in STMT_INFO->mask_precision. Returns true when the + precision changed. */ -static void +static bool vect_determine_mask_precision (vec_info *vinfo, stmt_vec_info stmt_info) { if (!possible_vector_mask_operation_p (stmt_info)) - return; + return false; /* If at least one boolean input uses a vector mask type, pick the mask type with the narrowest elements. @@ -6916,8 +7246,11 @@ vect_determine_mask_precision (vec_info *vinfo, stmt_vec_info stmt_info) scalar_mode mode; tree vectype, mask_type; if (is_a <scalar_mode> (TYPE_MODE (op0_type), &mode) - && (vectype = get_vectype_for_scalar_type (vinfo, op0_type)) - && (mask_type = get_mask_type_for_scalar_type (vinfo, op0_type)) + /* Do not allow this to set vinfo->vector_mode, this might + disrupt the result for the next iteration. */ + && (vectype = get_related_vectype_for_scalar_type + (vinfo->vector_mode, op0_type)) + && (mask_type = truth_type_for (vectype)) && expand_vec_cmp_expr_p (vectype, mask_type, code)) precision = GET_MODE_BITSIZE (mode); } @@ -6943,19 +7276,30 @@ vect_determine_mask_precision (vec_info *vinfo, stmt_vec_info stmt_info) } } - if (dump_enabled_p ()) + if (stmt_info->mask_precision != precision) { - if (precision == ~0U) - dump_printf_loc (MSG_NOTE, vect_location, - "using normal nonmask vectors for %G", - stmt_info->stmt); - else - dump_printf_loc (MSG_NOTE, vect_location, - "using boolean precision %d for %G", - precision, stmt_info->stmt); - } + if (dump_enabled_p ()) + { + if (precision == ~0U) + dump_printf_loc (MSG_NOTE, vect_location, + "using normal nonmask vectors for %G", + stmt_info->stmt); + else + dump_printf_loc (MSG_NOTE, vect_location, + "using boolean precision %d for %G", + precision, stmt_info->stmt); + } - stmt_info->mask_precision = precision; + /* ??? We'd like to assert stmt_info->mask_precision == 0 + || stmt_info->mask_precision > precision, thus that we only + decrease mask precisions throughout iteration, but the + tcc_comparison handling above means for comparisons of bools + we start with 8 but might increase in case the bools get mask + precision on their own. */ + stmt_info->mask_precision = precision; + return true; + } + return false; } /* Handle vect_determine_precisions for STMT_INFO, given that we @@ -6988,22 +7332,33 @@ vect_determine_precisions (vec_info *vinfo) DUMP_VECT_SCOPE ("vect_determine_precisions"); - for (unsigned int i = 0; i < nbbs; i++) + /* For mask precisions we have to iterate since otherwise we do not + get reduction PHI precision correct. For now do this only for + loop vectorization. */ + bool changed; + do { - basic_block bb = bbs[i]; - for (auto gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) + changed = false; + for (unsigned int i = 0; i < nbbs; i++) { - stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi.phi ()); - if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info)) - vect_determine_mask_precision (vinfo, stmt_info); - } - for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) - { - stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi_stmt (gsi)); - if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info)) - vect_determine_mask_precision (vinfo, stmt_info); + basic_block bb = bbs[i]; + for (auto gsi = gsi_start_phis (bb); + !gsi_end_p (gsi); gsi_next (&gsi)) + { + stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi.phi ()); + if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info)) + changed |= vect_determine_mask_precision (vinfo, stmt_info); + } + for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) + { + stmt_vec_info stmt_info = vinfo->lookup_stmt (gsi_stmt (gsi)); + if (stmt_info && STMT_VINFO_VECTORIZABLE (stmt_info)) + changed |= vect_determine_mask_precision (vinfo, stmt_info); + } } } + while (changed && is_a <loop_vec_info> (vinfo)); + for (unsigned int i = 0; i < nbbs; i++) { basic_block bb = bbs[nbbs - i - 1]; @@ -7188,14 +7543,17 @@ vect_mark_pattern_stmts (vec_info *vinfo, break; } /* Try harder to find a mid-entry into an earlier pattern - sequence. This means that the initial 'lookfor' was + sequence. Likewise an entry to a stmt skipping a conversion + on an input. This means that the initial 'lookfor' was bogus. */ if (!found) { for (unsigned i = 0; i < op.num_ops; ++i) if (TREE_CODE (op.ops[i]) == SSA_NAME) if (auto def = vinfo->lookup_def (op.ops[i])) - if (vect_is_reduction (def)) + if (vect_is_reduction (def) + || (is_a <gphi *> (def->stmt) + && STMT_VINFO_REDUC_DEF (def) != NULL)) { STMT_VINFO_REDUC_IDX (vinfo->lookup_stmt (s)) = i; lookfor = gimple_get_lhs (s); |