aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-patterns.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2018-07-12 13:02:00 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2018-07-12 13:02:00 +0000
commit2c58d42c3ed599b4c2976fc173eefd8e016ea216 (patch)
tree1e031aeefba8cc358f1917d8f4088473cf5a38d7 /gcc/tree-vect-patterns.c
parent0936858f081b77319f8f6e5825dc86d2861d0445 (diff)
downloadgcc-2c58d42c3ed599b4c2976fc173eefd8e016ea216.zip
gcc-2c58d42c3ed599b4c2976fc173eefd8e016ea216.tar.gz
gcc-2c58d42c3ed599b4c2976fc173eefd8e016ea216.tar.bz2
Use conditional internal functions in if-conversion
This patch uses IFN_COND_* to vectorise conditionally-executed, potentially-trapping arithmetic, such as most floating-point ops with -ftrapping-math. E.g.: if (cond) { ... x = a + b; ... } becomes: ... x = .COND_ADD (cond, a, b, else_value); ... When this transformation is done on its own, the value of x for !cond isn't important, so else_value is simply the target's preferred_else_value (i.e. the value it can handle the most efficiently). However, the patch also looks for the equivalent of: y = cond ? x : c; in which the "then" value is the result of the conditionally-executed operation and the "else" value "c" is some value that is available at x. In that case we can instead use: x = .COND_ADD (cond, a, b, c); and replace uses of y with uses of x. The patch also looks for: y = !cond ? c : x; which can be transformed in the same way. This involved adding a new utility function inverse_conditions_p, which was already open-coded in a more limited way in match.pd. 2018-07-12 Richard Sandiford <richard.sandiford@linaro.org> gcc/ * fold-const.h (inverse_conditions_p): Declare. * fold-const.c (inverse_conditions_p): New function. * match.pd: Use inverse_conditions_p. Add folds of view_converts that test the inverse condition of a conditional internal function. * internal-fn.h (vectorized_internal_fn_supported_p): Declare. * internal-fn.c (internal_fn_mask_index): Handle conditional internal functions. (vectorized_internal_fn_supported_p): New function. * tree-if-conv.c: Include internal-fn.h and fold-const.h. (any_pred_load_store): Replace with... (need_to_predicate): ...this new variable. (redundant_ssa_names): New variable. (ifcvt_can_use_mask_load_store): Move initial checks to... (ifcvt_can_predicate): ...this new function. Handle tree codes for which a conditional internal function exists. (if_convertible_gimple_assign_stmt_p): Use ifcvt_can_predicate instead of ifcvt_can_use_mask_load_store. Update after variable name change. (predicate_load_or_store): New function, split out from predicate_mem_writes. (check_redundant_cond_expr): New function. (value_available_p): Likewise. (predicate_rhs_code): Likewise. (predicate_mem_writes): Rename to... (predicate_statements): ...this. Use predicate_load_or_store and predicate_rhs_code. (combine_blocks, tree_if_conversion): Update after above name changes. (ifcvt_local_dce): Handle redundant_ssa_names. * tree-vect-patterns.c (vect_recog_mask_conversion_pattern): Handle general conditional functions. * tree-vect-stmts.c (vectorizable_call): Likewise. gcc/testsuite/ * gcc.dg/vect/vect-cond-arith-4.c: New test. * gcc.dg/vect/vect-cond-arith-5.c: Likewise. * gcc.target/aarch64/sve/cond_arith_1.c: Likewise. * gcc.target/aarch64/sve/cond_arith_1_run.c: Likewise. * gcc.target/aarch64/sve/cond_arith_2.c: Likewise. * gcc.target/aarch64/sve/cond_arith_2_run.c: Likewise. * gcc.target/aarch64/sve/cond_arith_3.c: Likewise. * gcc.target/aarch64/sve/cond_arith_3_run.c: Likewise. From-SVN: r262589
Diffstat (limited to 'gcc/tree-vect-patterns.c')
-rw-r--r--gcc/tree-vect-patterns.c71
1 files changed, 37 insertions, 34 deletions
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 8e28950..4c22afd 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -3905,65 +3905,68 @@ vect_recog_mask_conversion_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
/* Check for MASK_LOAD ans MASK_STORE calls requiring mask conversion. */
if (is_gimple_call (last_stmt)
- && gimple_call_internal_p (last_stmt)
- && (gimple_call_internal_fn (last_stmt) == IFN_MASK_STORE
- || gimple_call_internal_fn (last_stmt) == IFN_MASK_LOAD))
+ && gimple_call_internal_p (last_stmt))
{
gcall *pattern_stmt;
- bool load = (gimple_call_internal_fn (last_stmt) == IFN_MASK_LOAD);
- if (load)
+ internal_fn ifn = gimple_call_internal_fn (last_stmt);
+ int mask_argno = internal_fn_mask_index (ifn);
+ if (mask_argno < 0)
+ return NULL;
+
+ bool store_p = internal_store_fn_p (ifn);
+ if (store_p)
{
- lhs = gimple_call_lhs (last_stmt);
- vectype1 = get_vectype_for_scalar_type (TREE_TYPE (lhs));
+ int rhs_index = internal_fn_stored_value_index (ifn);
+ tree rhs = gimple_call_arg (last_stmt, rhs_index);
+ vectype1 = get_vectype_for_scalar_type (TREE_TYPE (rhs));
}
else
{
- rhs2 = gimple_call_arg (last_stmt, 3);
- vectype1 = get_vectype_for_scalar_type (TREE_TYPE (rhs2));
+ lhs = gimple_call_lhs (last_stmt);
+ vectype1 = get_vectype_for_scalar_type (TREE_TYPE (lhs));
}
- rhs1 = gimple_call_arg (last_stmt, 2);
- rhs1_type = search_type_for_mask (rhs1, vinfo);
- if (!rhs1_type)
+ tree mask_arg = gimple_call_arg (last_stmt, mask_argno);
+ tree mask_arg_type = search_type_for_mask (mask_arg, vinfo);
+ if (!mask_arg_type)
return NULL;
- vectype2 = get_mask_type_for_scalar_type (rhs1_type);
+ vectype2 = get_mask_type_for_scalar_type (mask_arg_type);
if (!vectype1 || !vectype2
|| known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
TYPE_VECTOR_SUBPARTS (vectype2)))
return NULL;
- tmp = build_mask_conversion (rhs1, vectype1, stmt_vinfo);
+ tmp = build_mask_conversion (mask_arg, vectype1, stmt_vinfo);
- if (load)
+ auto_vec<tree, 8> args;
+ unsigned int nargs = gimple_call_num_args (last_stmt);
+ args.safe_grow (nargs);
+ for (unsigned int i = 0; i < nargs; ++i)
+ args[i] = ((int) i == mask_argno
+ ? tmp
+ : gimple_call_arg (last_stmt, i));
+ pattern_stmt = gimple_build_call_internal_vec (ifn, args);
+
+ if (!store_p)
{
lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
- pattern_stmt
- = gimple_build_call_internal (IFN_MASK_LOAD, 3,
- gimple_call_arg (last_stmt, 0),
- gimple_call_arg (last_stmt, 1),
- tmp);
gimple_call_set_lhs (pattern_stmt, lhs);
}
- else
- pattern_stmt
- = gimple_build_call_internal (IFN_MASK_STORE, 4,
- gimple_call_arg (last_stmt, 0),
- gimple_call_arg (last_stmt, 1),
- tmp,
- gimple_call_arg (last_stmt, 3));
-
gimple_call_set_nothrow (pattern_stmt, true);
pattern_stmt_info = new_stmt_vec_info (pattern_stmt, vinfo);
set_vinfo_for_stmt (pattern_stmt, pattern_stmt_info);
- STMT_VINFO_DATA_REF (pattern_stmt_info)
- = STMT_VINFO_DATA_REF (stmt_vinfo);
- STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info)
- = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo);
- STMT_VINFO_GATHER_SCATTER_P (pattern_stmt_info)
- = STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo);
+ if (STMT_VINFO_DATA_REF (stmt_vinfo))
+ {
+ STMT_VINFO_DATA_REF (pattern_stmt_info)
+ = STMT_VINFO_DATA_REF (stmt_vinfo);
+ STMT_VINFO_DR_WRT_VEC_LOOP (pattern_stmt_info)
+ = STMT_VINFO_DR_WRT_VEC_LOOP (stmt_vinfo);
+ STMT_VINFO_GATHER_SCATTER_P (pattern_stmt_info)
+ = STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo);
+ }
*type_out = vectype1;
vect_pattern_detected ("vect_recog_mask_conversion_pattern", last_stmt);