diff options
author | Richard Sandiford <richard.sandiford@arm.com> | 2018-07-31 14:22:01 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@gcc.gnu.org> | 2018-07-31 14:22:01 +0000 |
commit | fef96d8e2a370e826acdf914d51c88aa2657340a (patch) | |
tree | 1f0d6677d4d581b174517778c0b522f98005c80f /gcc/tree-vect-loop.c | |
parent | dbe1b846648fad29d105e2e503120a4279a32593 (diff) | |
download | gcc-fef96d8e2a370e826acdf914d51c88aa2657340a.zip gcc-fef96d8e2a370e826acdf914d51c88aa2657340a.tar.gz gcc-fef96d8e2a370e826acdf914d51c88aa2657340a.tar.bz2 |
[11/46] Pass back a stmt_vec_info from vect_is_simple_use
This patch makes vect_is_simple_use pass back a stmt_vec_info to
those callers that want it. Most users only need the stmt_vec_info
but some need the gimple stmt too.
It's probably high time we added a class to represent "simple operands"
instead, but I have a separate series that tries to clean up how
operands are handled (with a view to allowing mixed vector sizes).
2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
gcc/
* tree-vectorizer.h (vect_is_simple_use): Add an optional
stmt_vec_info * parameter before the optional gimple **.
* tree-vect-stmts.c (vect_is_simple_use): Likewise.
(process_use, vect_get_vec_def_for_operand_1): Update callers.
(vect_get_vec_def_for_operand, vectorizable_shift): Likewise.
* tree-vect-loop.c (vectorizable_reduction): Likewise.
(vectorizable_live_operation): Likewise.
* tree-vect-patterns.c (type_conversion_p): Likewise.
(vect_look_through_possible_promotion): Likewise.
(vect_recog_rotate_pattern): Likewise.
* tree-vect-slp.c (vect_get_and_check_slp_defs): Likewise.
From-SVN: r263126
Diffstat (limited to 'gcc/tree-vect-loop.c')
-rw-r--r-- | gcc/tree-vect-loop.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index e451cbc..1f8847f 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -6090,7 +6090,6 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, int op_type; optab optab; tree new_temp = NULL_TREE; - gimple *def_stmt; enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type; gimple *cond_reduc_def_stmt = NULL; enum tree_code cond_reduc_op_code = ERROR_MARK; @@ -6324,13 +6323,14 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, if (i == 0 && code == COND_EXPR) continue; - is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, - &dts[i], &tem, &def_stmt); + stmt_vec_info def_stmt_info; + is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem, + &def_stmt_info); dt = dts[i]; gcc_assert (is_simple_use); if (dt == vect_reduction_def) { - reduc_def_stmt = def_stmt; + reduc_def_stmt = def_stmt_info; reduc_index = i; continue; } @@ -6352,11 +6352,11 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, return false; if (dt == vect_nested_cycle) - { - found_nested_cycle_def = true; - reduc_def_stmt = def_stmt; - reduc_index = i; - } + { + found_nested_cycle_def = true; + reduc_def_stmt = def_stmt_info; + reduc_index = i; + } if (i == 1 && code == COND_EXPR) { @@ -6367,11 +6367,11 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, cond_reduc_val = ops[i]; } if (dt == vect_induction_def - && def_stmt != NULL - && is_nonwrapping_integer_induction (def_stmt, loop)) + && def_stmt_info + && is_nonwrapping_integer_induction (def_stmt_info, loop)) { cond_reduc_dt = dt; - cond_reduc_def_stmt = def_stmt; + cond_reduc_def_stmt = def_stmt_info; } } } @@ -7958,7 +7958,7 @@ vectorizable_live_operation (gimple *stmt, else { enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info); - vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt); + vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt); gcc_checking_assert (ncopies == 1 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); |