aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-stmts.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2018-07-12 13:02:00 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2018-07-12 13:02:00 +0000
commit2c58d42c3ed599b4c2976fc173eefd8e016ea216 (patch)
tree1e031aeefba8cc358f1917d8f4088473cf5a38d7 /gcc/tree-vect-stmts.c
parent0936858f081b77319f8f6e5825dc86d2861d0445 (diff)
downloadgcc-2c58d42c3ed599b4c2976fc173eefd8e016ea216.zip
gcc-2c58d42c3ed599b4c2976fc173eefd8e016ea216.tar.gz
gcc-2c58d42c3ed599b4c2976fc173eefd8e016ea216.tar.bz2
Use conditional internal functions in if-conversion
This patch uses IFN_COND_* to vectorise conditionally-executed, potentially-trapping arithmetic, such as most floating-point ops with -ftrapping-math. E.g.: if (cond) { ... x = a + b; ... } becomes: ... x = .COND_ADD (cond, a, b, else_value); ... When this transformation is done on its own, the value of x for !cond isn't important, so else_value is simply the target's preferred_else_value (i.e. the value it can handle the most efficiently). However, the patch also looks for the equivalent of: y = cond ? x : c; in which the "then" value is the result of the conditionally-executed operation and the "else" value "c" is some value that is available at x. In that case we can instead use: x = .COND_ADD (cond, a, b, c); and replace uses of y with uses of x. The patch also looks for: y = !cond ? c : x; which can be transformed in the same way. This involved adding a new utility function inverse_conditions_p, which was already open-coded in a more limited way in match.pd. 2018-07-12 Richard Sandiford <richard.sandiford@linaro.org> gcc/ * fold-const.h (inverse_conditions_p): Declare. * fold-const.c (inverse_conditions_p): New function. * match.pd: Use inverse_conditions_p. Add folds of view_converts that test the inverse condition of a conditional internal function. * internal-fn.h (vectorized_internal_fn_supported_p): Declare. * internal-fn.c (internal_fn_mask_index): Handle conditional internal functions. (vectorized_internal_fn_supported_p): New function. * tree-if-conv.c: Include internal-fn.h and fold-const.h. (any_pred_load_store): Replace with... (need_to_predicate): ...this new variable. (redundant_ssa_names): New variable. (ifcvt_can_use_mask_load_store): Move initial checks to... (ifcvt_can_predicate): ...this new function. Handle tree codes for which a conditional internal function exists. (if_convertible_gimple_assign_stmt_p): Use ifcvt_can_predicate instead of ifcvt_can_use_mask_load_store. Update after variable name change. (predicate_load_or_store): New function, split out from predicate_mem_writes. (check_redundant_cond_expr): New function. (value_available_p): Likewise. (predicate_rhs_code): Likewise. (predicate_mem_writes): Rename to... (predicate_statements): ...this. Use predicate_load_or_store and predicate_rhs_code. (combine_blocks, tree_if_conversion): Update after above name changes. (ifcvt_local_dce): Handle redundant_ssa_names. * tree-vect-patterns.c (vect_recog_mask_conversion_pattern): Handle general conditional functions. * tree-vect-stmts.c (vectorizable_call): Likewise. gcc/testsuite/ * gcc.dg/vect/vect-cond-arith-4.c: New test. * gcc.dg/vect/vect-cond-arith-5.c: Likewise. * gcc.target/aarch64/sve/cond_arith_1.c: Likewise. * gcc.target/aarch64/sve/cond_arith_1_run.c: Likewise. * gcc.target/aarch64/sve/cond_arith_2.c: Likewise. * gcc.target/aarch64/sve/cond_arith_2_run.c: Likewise. * gcc.target/aarch64/sve/cond_arith_3.c: Likewise. * gcc.target/aarch64/sve/cond_arith_3_run.c: Likewise. From-SVN: r262589
Diffstat (limited to 'gcc/tree-vect-stmts.c')
-rw-r--r--gcc/tree-vect-stmts.c100
1 files changed, 70 insertions, 30 deletions
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 73b81e1..1c847ae 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -3126,12 +3126,14 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
vec_info *vinfo = stmt_info->vinfo;
tree fndecl, new_temp, rhs_type;
- enum vect_def_type dt[3]
- = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
- int ndts = 3;
+ enum vect_def_type dt[4]
+ = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type,
+ vect_unknown_def_type };
+ int ndts = ARRAY_SIZE (dt);
gimple *new_stmt = NULL;
int ncopies, j;
- vec<tree> vargs = vNULL;
+ auto_vec<tree, 8> vargs;
+ auto_vec<tree, 8> orig_vargs;
enum { NARROW, NONE, WIDEN } modifier;
size_t i, nargs;
tree lhs;
@@ -3170,22 +3172,38 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* Bail out if the function has more than three arguments, we do not have
interesting builtin functions to vectorize with more than two arguments
except for fma. No arguments is also not good. */
- if (nargs == 0 || nargs > 3)
+ if (nargs == 0 || nargs > 4)
return false;
/* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
- if (gimple_call_internal_p (stmt)
- && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
+ combined_fn cfn = gimple_call_combined_fn (stmt);
+ if (cfn == CFN_GOMP_SIMD_LANE)
{
nargs = 0;
rhs_type = unsigned_type_node;
}
+ int mask_opno = -1;
+ if (internal_fn_p (cfn))
+ mask_opno = internal_fn_mask_index (as_internal_fn (cfn));
+
for (i = 0; i < nargs; i++)
{
tree opvectype;
op = gimple_call_arg (stmt, i);
+ if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "use not simple.\n");
+ return false;
+ }
+
+ /* Skip the mask argument to an internal function. This operand
+ has been converted via a pattern if necessary. */
+ if ((int) i == mask_opno)
+ continue;
/* We can only handle calls with arguments of the same type. */
if (rhs_type
@@ -3199,14 +3217,6 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!rhs_type)
rhs_type = TREE_TYPE (op);
- if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "use not simple.\n");
- return false;
- }
-
if (!vectype_in)
vectype_in = opvectype;
else if (opvectype
@@ -3264,7 +3274,6 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
to vectorize other operations in the loop. */
fndecl = NULL_TREE;
internal_fn ifn = IFN_LAST;
- combined_fn cfn = gimple_call_combined_fn (stmt);
tree callee = gimple_call_fndecl (stmt);
/* First try using an internal function. */
@@ -3328,6 +3337,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
needs to be generated. */
gcc_assert (ncopies >= 1);
+ vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
@@ -3337,6 +3347,13 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
record_stmt_cost (cost_vec, ncopies / 2,
vec_promote_demote, stmt_info, 0, vect_body);
+ if (loop_vinfo && mask_opno >= 0)
+ {
+ unsigned int nvectors = (slp_node
+ ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)
+ : ncopies);
+ vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out);
+ }
return true;
}
@@ -3349,25 +3366,24 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
scalar_dest = gimple_call_lhs (stmt);
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
+ bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
+
prev_stmt_info = NULL;
if (modifier == NONE || ifn != IFN_LAST)
{
tree prev_res = NULL_TREE;
+ vargs.safe_grow (nargs);
+ orig_vargs.safe_grow (nargs);
for (j = 0; j < ncopies; ++j)
{
/* Build argument list for the vectorized call. */
- if (j == 0)
- vargs.create (nargs);
- else
- vargs.truncate (0);
-
if (slp_node)
{
auto_vec<vec<tree> > vec_defs (nargs);
vec<tree> vec_oprnds0;
for (i = 0; i < nargs; i++)
- vargs.quick_push (gimple_call_arg (stmt, i));
+ vargs[i] = gimple_call_arg (stmt, i);
vect_get_slp_defs (vargs, slp_node, &vec_defs);
vec_oprnds0 = vec_defs[0];
@@ -3382,6 +3398,9 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
if (modifier == NARROW)
{
+ /* We don't define any narrowing conditional functions
+ at present. */
+ gcc_assert (mask_opno < 0);
tree half_res = make_ssa_name (vectype_in);
gcall *call
= gimple_build_call_internal_vec (ifn, vargs);
@@ -3400,6 +3419,17 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
else
{
+ if (mask_opno >= 0 && masked_loop_p)
+ {
+ unsigned int vec_num = vec_oprnds0.length ();
+ /* Always true for SLP. */
+ gcc_assert (ncopies == 1);
+ tree mask = vect_get_loop_mask (gsi, masks, vec_num,
+ vectype_out, i);
+ vargs[mask_opno] = prepare_load_store_mask
+ (TREE_TYPE (mask), mask, vargs[mask_opno], gsi);
+ }
+
gcall *call;
if (ifn != IFN_LAST)
call = gimple_build_call_internal_vec (ifn, vargs);
@@ -3429,17 +3459,22 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vec_oprnd0
= vect_get_vec_def_for_operand (op, stmt);
else
- {
- vec_oprnd0 = gimple_call_arg (new_stmt, i);
- vec_oprnd0
- = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
- }
+ vec_oprnd0
+ = vect_get_vec_def_for_stmt_copy (dt[i], orig_vargs[i]);
- vargs.quick_push (vec_oprnd0);
+ orig_vargs[i] = vargs[i] = vec_oprnd0;
+ }
+
+ if (mask_opno >= 0 && masked_loop_p)
+ {
+ tree mask = vect_get_loop_mask (gsi, masks, ncopies,
+ vectype_out, j);
+ vargs[mask_opno]
+ = prepare_load_store_mask (TREE_TYPE (mask), mask,
+ vargs[mask_opno], gsi);
}
- if (gimple_call_internal_p (stmt)
- && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
+ if (cfn == CFN_GOMP_SIMD_LANE)
{
tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
tree new_var
@@ -3451,6 +3486,9 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
else if (modifier == NARROW)
{
+ /* We don't define any narrowing conditional functions at
+ present. */
+ gcc_assert (mask_opno < 0);
tree half_res = make_ssa_name (vectype_in);
gcall *call = gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_lhs (call, half_res);
@@ -3490,6 +3528,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
else if (modifier == NARROW)
{
+ /* We don't define any narrowing conditional functions at present. */
+ gcc_assert (mask_opno < 0);
for (j = 0; j < ncopies; ++j)
{
/* Build argument list for the vectorized call. */